bitkeeper revision 1.830 (40644790Uo9XYxIfmoQH6z677kJnPg)
authoriap10@tetris.cl.cam.ac.uk <iap10@tetris.cl.cam.ac.uk>
Fri, 26 Mar 2004 15:09:04 +0000 (15:09 +0000)
committeriap10@tetris.cl.cam.ac.uk <iap10@tetris.cl.cam.ac.uk>
Fri, 26 Mar 2004 15:09:04 +0000 (15:09 +0000)
cleanup

tools/xc/lib/xc_domain.c
xen/arch/i386/process.c
xen/common/domain.c
xen/common/memory.c
xen/common/schedule.c
xen/common/shadow.c
xen/include/asm-i386/processor.h
xen/include/xen/mm.h
xen/include/xen/shadow.h
xen/net/dev.c

index 4d68f5d937809c823da54a3ee3a069fe52fc4e97..1d7da0442bb5383c5e826c58af6e66dfeee4b7d0 100644 (file)
@@ -102,3 +102,14 @@ int xc_domain_getinfo(int xc_handle,
 
     return nr_doms;
 }
+
+int xc_shadow_control(int xc_handle,
+                     u64 domid, 
+                     unsigned int sop)
+{
+    dom0_op_t op;
+    op.cmd = DOM0_SHADOW_CONTROL;
+    op.u.shadow_control.domain = (domid_t)domid;
+    op.u.shadow_control.op  = sop;
+    return do_dom0_op(xc_handle, &op);
+}
index 6bcb901f3205ba0f0f43e5e4165a1a89cc2f76be..2ff971690a12b395e622e30cb3db8ba43f4a2e68 100644 (file)
@@ -281,13 +281,7 @@ void switch_to(struct task_struct *prev_p, struct task_struct *next_p)
     }
 
     /* Switch page tables.  */
-    if( next_p->mm.shadow_mode )
-      {
-       check_pagetable( next_p, next_p->mm.pagetable, "switch" );
-       write_cr3_counted(pagetable_val(next_p->mm.shadow_table));
-      }
-    else
-      write_cr3_counted(pagetable_val(next_p->mm.pagetable));
+    write_ptbase( &next_p->mm );
 
     set_current(next_p);
 
index 0783f7f838327d600bb4756a3a61e4c05b5bb962..8b9dd6916dd658eb4b2d699da009ee75426d8c91 100644 (file)
@@ -850,7 +850,7 @@ int setup_guestos(struct task_struct *p, dom0_createdomain_t *params,
     set_bit(PF_CONSTRUCTED, &p->flags);
 
 #if 0 // XXXXX DO NOT CHECK IN ENBALED !!! (but useful for testing so leave) 
-    shadow_mode_enable(p, SHM_test); 
+    shadow_mode_enable(&p->mm, SHM_test); 
 #endif
 
     new_thread(p, 
index c8510c514da9fe189b7230edec304ebefc0bd696..01d3aeb181ddadbb577b6c397c55cde191ae173d 100644 (file)
@@ -766,20 +766,22 @@ void free_page_type(struct pfn_info *page, unsigned int type)
     case PGT_l1_page_table:
         free_l1_table(page);
        if ( unlikely(current->mm.shadow_mode) && 
-            (get_shadow_status(current, page-frame_table) & PSH_shadowed) )
+            (get_shadow_status(&current->mm, 
+                               page-frame_table) & PSH_shadowed) )
        {
            unshadow_table( page-frame_table, type );
-           put_shadow_status(current);
+           put_shadow_status(&current->mm);
         }
        return;
 
     case PGT_l2_page_table:
         free_l2_table(page);
        if ( unlikely(current->mm.shadow_mode) && 
-            (get_shadow_status(current, page-frame_table) & PSH_shadowed) )
+            (get_shadow_status(&current->mm, 
+                               page-frame_table) & PSH_shadowed) )
        {
            unshadow_table( page-frame_table, type );
-           put_shadow_status(current);
+           put_shadow_status(&current->mm);
         }
        return;
 
@@ -854,16 +856,10 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
             old_base_pfn = pagetable_val(current->mm.pagetable) >> PAGE_SHIFT;
             current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
 
-            if( unlikely(current->mm.shadow_mode))
-            {
-                current->mm.shadow_table = 
-                    shadow_mk_pagetable(current, pfn<<PAGE_SHIFT);
-                write_cr3_counted(pagetable_val(current->mm.shadow_table));
-            }
-            else
-            {
-                write_cr3_counted(pfn << PAGE_SHIFT);
-            }
+            shadow_mk_pagetable(&current->mm);
+
+           write_ptbase(&current->mm);
+
             put_page_and_type(&frame_table[old_base_pfn]);    
         }
         else
@@ -1002,12 +998,12 @@ int do_mmu_update(mmu_update_t *ureqs, int count)
                                         mk_l1_pgentry(req.val)); 
 
                    if ( okay && unlikely(current->mm.shadow_mode) &&
-                        (get_shadow_status(current, page-frame_table) &
+                        (get_shadow_status(&current->mm, page-frame_table) &
                          PSH_shadowed) )
                    {
                        shadow_l1_normal_pt_update( req.ptr, req.val, 
                                                    &prev_spfn, &prev_spl1e );
-                       put_shadow_status(current);
+                       put_shadow_status(&current->mm);
                    }
 
                     put_page_type(page);
@@ -1021,11 +1017,11 @@ int do_mmu_update(mmu_update_t *ureqs, int count)
                                         pfn); 
 
                    if ( okay && unlikely(current->mm.shadow_mode) &&
-                        (get_shadow_status(current, page-frame_table) & 
+                        (get_shadow_status(&current->mm, page-frame_table) & 
                          PSH_shadowed) )
                    {
                        shadow_l2_normal_pt_update( req.ptr, req.val );
-                       put_shadow_status(current);
+                       put_shadow_status(&current->mm);
                    }
 
                     put_page_type(page);
@@ -1093,14 +1089,7 @@ int do_mmu_update(mmu_update_t *ureqs, int count)
 
     if ( deferred_ops & DOP_FLUSH_TLB )
     {
-        if ( unlikely(current->mm.shadow_mode) )
-       {
-            check_pagetable( current, 
-                            current->mm.pagetable, "pre-stlb-flush" );
-           write_cr3_counted(pagetable_val(current->mm.shadow_table));
-        }
-        else
-           write_cr3_counted(pagetable_val(current->mm.pagetable));
+        write_ptbase(&current->mm);
     }
 
     if ( deferred_ops & DOP_RELOAD_LDT )
@@ -1172,10 +1161,7 @@ int do_update_va_mapping(unsigned long page_nr,
     if ( unlikely(deferred_ops & DOP_FLUSH_TLB) || 
          unlikely(flags & UVMF_FLUSH_TLB) )
     {
-        if ( unlikely(p->mm.shadow_mode) )
-            write_cr3_counted(pagetable_val(p->mm.shadow_table));
-        else
-            write_cr3_counted(pagetable_val(p->mm.pagetable));
+        write_ptbase(&p->mm);
     }
     else if ( unlikely(flags & UVMF_INVLPG) )
         __flush_tlb_one(page_nr << PAGE_SHIFT);
index 32ebe1192023bd9ef0f17b70f5cd34889b5645d1..2a8e79f9fefd26a0f5f62df4b0add5f134b0637c 100644 (file)
@@ -300,9 +300,8 @@ void sched_pause_sync(struct task_struct *p)
     /* spin until domain is descheduled by its local scheduler */
     while ( schedule_data[cpu].curr == p )
     {
-            set_bit(_HYP_EVENT_NEED_RESCHED, &p->hyp_events);
-            hyp_event_notify(1 << cpu);
-            do_yield();
+               send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
+               do_yield();
     }
     
     
index 37114fe892ff8eae2ebcd29e94c88fd56d66e4b4..dc6705ab195c21fb3fe6ef3c3e90789fdd74aa4c 100644 (file)
@@ -26,86 +26,107 @@ hypercall lock anyhow (at least initially).
 
 ********/
 
-int shadow_mode_control( struct task_struct *p, unsigned int op )
+static inline void free_shadow_page( struct mm_struct *m, unsigned int pfn )
 {
-       if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_OFF )
+    unsigned long flags;
+
+       m->shadow_page_count--;
+
+    spin_lock_irqsave(&free_list_lock, flags);
+    list_add(&frame_table[pfn].list, &free_list);
+    free_pfns++;
+    spin_unlock_irqrestore(&free_list_lock, flags);
+}
+
+static void __free_shadow_table( struct mm_struct *m )
+{
+       int j;
+       struct shadow_status *a;
+       
+       // the code assumes you're not using the page tables i.e.
+    // the domain is stopped and cr3 is something else!!
+
+    // walk the hash table and call free_shadow_page on all pages
+
+    for(j=0;j<shadow_ht_buckets;j++)
     {
-               shadow_mode_disable(p);
+        a = &m->shadow_ht[j];        
+        if (a->pfn)
+        {
+            free_shadow_page( m, a->spfn_and_flags & PSH_pfn_mask );
+            a->pfn = 0;
+            a->spfn_and_flags = 0;
+        }
+        a=a->next;
+        while(a)
+               { 
+            struct shadow_status *next = a->next;
+            free_shadow_page( m, a->spfn_and_flags & PSH_pfn_mask );
+            a->pfn = 0;
+            a->spfn_and_flags = 0;
+            a->next = m->shadow_ht_free;
+            m->shadow_ht_free = a;
+            a=next;
+               }
        }
-       else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST )
-       {
-        shadow_mode_disable(p);
-        shadow_mode_enable(p, SHM_test);
-       }       
-       else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_FLUSH )
-    {
-               //shadow_mode_flush(p);
-    }
-       else
-    {
-               return -EINVAL;
-    }
-
-       return 0;
 }
 
-int shadow_mode_enable( struct task_struct *p, unsigned int mode )
+
+int shadow_mode_enable( struct mm_struct *m, unsigned int mode )
 {
        struct shadow_status **fptr;
        int i;
 
-       // sychronously stop domain
-    // XXX for the moment, only use on already stopped domains!!!
 
-       spin_lock_init(&p->mm.shadow_lock);
-       spin_lock(&p->mm.shadow_lock);
+       spin_lock_init(&m->shadow_lock);
+       spin_lock(&m->shadow_lock);
 
-    p->mm.shadow_mode = mode;
+    m->shadow_mode = mode;
        
        // allocate hashtable
-    p->mm.shadow_ht = kmalloc( shadow_ht_buckets * 
+    m->shadow_ht = kmalloc( shadow_ht_buckets * 
                                                           sizeof(struct shadow_status), GFP_KERNEL );
-       if( ! p->mm.shadow_ht )
+       if( ! m->shadow_ht )
                goto nomem;
 
-       memset( p->mm.shadow_ht, 0, shadow_ht_buckets * 
+       memset( m->shadow_ht, 0, shadow_ht_buckets * 
                                                           sizeof(struct shadow_status) );
 
 
        // allocate space for first lot of extra nodes
-    p->mm.shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size * 
+    m->shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size * 
                                                           sizeof(struct shadow_status)), GFP_KERNEL );
 
-       if( ! p->mm.shadow_ht_extras )
+       if( ! m->shadow_ht_extras )
                goto nomem;
 
-       memset( p->mm.shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size * 
+       memset( m->shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size * 
                                                           sizeof(struct shadow_status)) );
        
     // add extras to free list
-       fptr = &p->mm.shadow_ht_free;
+       fptr = &m->shadow_ht_free;
        for ( i=0; i<shadow_ht_extra_size; i++ )
        {
-               *fptr = &p->mm.shadow_ht_extras[i];
-               fptr = &(p->mm.shadow_ht_extras[i].next);
+               *fptr = &m->shadow_ht_extras[i];
+               fptr = &(m->shadow_ht_extras[i].next);
        }
        *fptr = NULL;
-       *((struct shadow_status ** ) &p->mm.shadow_ht_extras[shadow_ht_extra_size]) = NULL;
+       *((struct shadow_status ** ) 
+        &m->shadow_ht_extras[shadow_ht_extra_size]) = NULL;
 
-       spin_unlock(&p->mm.shadow_lock);
+       spin_unlock(&m->shadow_lock);
 
     // call shadow_mk_pagetable
-       p->mm.shadow_table = shadow_mk_pagetable( p, 
-                                                                                         pagetable_val(p->mm.pagetable) );
+       shadow_mk_pagetable( m );
 
        return 0;
 
 nomem:
-       spin_unlock(&p->mm.shadow_lock);
+       spin_unlock(&m->shadow_lock);
        return -ENOMEM;
 }
 
-void shadow_mode_disable( )
+static void shadow_mode_disable( struct mm_struct *m )
 {
 
     // free the hash buckets as you go
@@ -113,73 +134,72 @@ void shadow_mode_disable( )
     // free the hashtable itself
 }
 
-
-static inline void free_shadow_page( struct task_struct *p, unsigned int pfn )
+static void shadow_mode_flush( struct mm_struct *m )
 {
-    unsigned long flags;
 
-       p->mm.shadow_page_count--;
+    // since Dom0 did the hypercall, we should be running with it's page
+    // tables right now. Calling flush on yourself would be really
+    // stupid.
 
-    spin_lock_irqsave(&free_list_lock, flags);
-    list_add(&frame_table[pfn].list, &free_list);
-    free_pfns++;
-    spin_unlock_irqrestore(&free_list_lock, flags);
-}
+    if ( m == &current->mm )
+    {
+        printk("Don't try and flush your own page tables!\n");
+        return;
+    }
+   
+    spin_lock(&m->shadow_lock);
+       __free_shadow_table( m );
+       spin_unlock(&m->shadow_lock);
 
-static inline struct pfn_info *alloc_shadow_page( struct task_struct *p )
-{
-       p->mm.shadow_page_count++;
+    // call shadow_mk_pagetable
+    shadow_mk_pagetable( m );
 
-       return alloc_domain_page( NULL );
 }
 
 
-static void __free_shadow_table( struct task_struct *p )
+int shadow_mode_control( struct task_struct *p, unsigned int op )
 {
-       int j;
-       struct shadow_status *a;
-       
-       // the code assumes you're not using the page tables i.e.
-    // the domain is stopped and cr3 is something else!!
+    int  we_paused = 0;
+    // don't call if already shadowed...
 
-    // walk the hash table and call free_shadow_page on all pages
+       // sychronously stop domain
+    if( !(p->state & TASK_STOPPED) && !(p->state & TASK_PAUSED))
+    {
+           sched_pause_sync(p);
+        printk("paused domain\n");
+        we_paused = 1;
+    }
 
-    for(j=0;j<shadow_ht_buckets;j++)
+       if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_OFF )
     {
-        a = &p->mm.shadow_ht[j];        
-        if (a->pfn)
-        {
-            free_shadow_page( p, a->spfn_and_flags & PSH_pfn_mask );
-            a->pfn = 0;
-            a->spfn_and_flags = 0;
-        }
-        a=a->next;
-        while(a)
-               { 
-            struct shadow_status *next = a->next;
-            free_shadow_page( p, a->spfn_and_flags & PSH_pfn_mask );
-            a->pfn = 0;
-            a->spfn_and_flags = 0;
-            a->next = p->mm.shadow_ht_free;
-            p->mm.shadow_ht_free = a;
-            a=next;
-               }
+               shadow_mode_disable(&p->mm);
        }
+       else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST )
+       {
+        shadow_mode_disable(&p->mm);
+        shadow_mode_enable(&p->mm, SHM_test);
+       }       
+       else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_FLUSH )
+    {
+               shadow_mode_flush(&p->mm);
+    }
+       else
+    {
+               return -EINVAL;
+    }
+
+    if ( we_paused ) wake_up(p);
+       return 0;
 }
 
-static void flush_shadow_table( struct task_struct *p )
-{
-       
-    // XXX synchronously stop domain (needed for SMP guests)
 
-    // switch to idle task's page tables
-    // walk the hash table and call free_shadow_page on all pages
-       spin_lock(&p->mm.shadow_lock);
-       __free_shadow_table( p );
-       spin_unlock(&p->mm.shadow_lock);
 
-    // XXX unpause domain
+static inline struct pfn_info *alloc_shadow_page( struct mm_struct *m )
+{
+       m->shadow_page_count++;
+
+       return alloc_domain_page( NULL );
 }
 
 
@@ -199,8 +219,8 @@ void unshadow_table( unsigned long gpfn, unsigned int type )
        // even in the SMP guest case, there won't be a race here as
     // this CPU was the one that cmpxchg'ed the page to invalid
 
-       spfn = __shadow_status(current, gpfn) & PSH_pfn_mask;
-       delete_shadow_status(current, gpfn);
+       spfn = __shadow_status(&current->mm, gpfn) & PSH_pfn_mask;
+       delete_shadow_status(&current->mm, gpfn);
 
 #if 0 // XXX leave as might be useful for later debugging
        { 
@@ -220,13 +240,13 @@ void unshadow_table( unsigned long gpfn, unsigned int type )
     else
                perfc_decr(shadow_l2_pages);
 
-       free_shadow_page( current, spfn );
+       free_shadow_page( &current->mm, spfn );
 
 }
 
 
-static unsigned long shadow_l2_table( 
-                     struct task_struct *p, unsigned long gpfn )
+unsigned long shadow_l2_table( 
+                     struct mm_struct *m, unsigned long gpfn )
 {
        struct pfn_info *spfn_info;
        unsigned long spfn;
@@ -234,7 +254,6 @@ static unsigned long shadow_l2_table(
        int i;
 
        SH_VVLOG("shadow_l2_table( %08lx )",gpfn);
-       spin_lock(&p->mm.shadow_lock);
 
        perfc_incrc(shadow_l2_table_count);
        perfc_incr(shadow_l2_pages);
@@ -242,14 +261,14 @@ static unsigned long shadow_l2_table(
     // XXX in future, worry about racing in SMP guests 
     //      -- use cmpxchg with PSH_pending flag to show progress (and spin)
 
-       spfn_info = alloc_shadow_page(p);
+       spfn_info = alloc_shadow_page(m);
 
     ASSERT( spfn_info ); // XXX deal with failure later e.g. blow cache
 
        spfn = (unsigned long) (spfn_info - frame_table);
 
        // mark pfn as being shadowed, update field to point at shadow
-       set_shadow_status(p, gpfn, spfn | PSH_shadowed);
+       set_shadow_status(m, gpfn, spfn | PSH_shadowed);
        
        // we need to do this before the linear map is set up
        spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
@@ -315,33 +334,9 @@ static unsigned long shadow_l2_table(
 
        SH_VLOG("shadow_l2_table( %08lx -> %08lx)",gpfn,spfn);
 
-       spin_unlock(&p->mm.shadow_lock);
        return spfn;
 }
 
-pagetable_t shadow_mk_pagetable( struct task_struct *p, 
-                                                                                          unsigned long gptbase)
-{
-       unsigned long gpfn, spfn=0;
-
-       SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
-                        gptbase, p->mm.shadow_mode );
-
-       if ( likely(p->mm.shadow_mode) )  // should always be true if we're here
-       {
-               gpfn =  gptbase >> PAGE_SHIFT;
-               
-               if ( unlikely((spfn=__shadow_status(p, gpfn)) == 0 ) )
-               {
-                       spfn = shadow_l2_table(p, gpfn );
-               }      
-       }
-
-       SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
-                        gptbase, p->mm.shadow_mode );
-
-       return mk_pagetable(spfn<<PAGE_SHIFT);
-}
 
 int shadow_fault( unsigned long va, long error_code )
 {
@@ -433,7 +428,7 @@ int shadow_fault( unsigned long va, long error_code )
         gl1pfn = gpde>>PAGE_SHIFT;
 
         
-        if ( ! (sl1pfn=__shadow_status(current, gl1pfn) ) )
+        if ( ! (sl1pfn=__shadow_status(&current->mm, gl1pfn) ) )
         {
             // this L1 is NOT already shadowed so we need to shadow it
             struct pfn_info *sl1pfn_info;
@@ -446,7 +441,7 @@ int shadow_fault( unsigned long va, long error_code )
                perfc_incrc(shadow_l1_table_count);
                perfc_incr(shadow_l1_pages);
 
-            set_shadow_status(current, gl1pfn, PSH_shadowed | sl1pfn);
+            set_shadow_status(&current->mm, gl1pfn, PSH_shadowed | sl1pfn);
 
             gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
             spde = (gpde & ~PAGE_MASK) | _PAGE_RW | (sl1pfn<<PAGE_SHIFT);
@@ -530,13 +525,13 @@ void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte,
     l1_pgentry_t * spl1e, * prev_spl1e = *prev_spl1e_ptr;
 
 
-SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%08lx\n",
+SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%p\n",
 pa,gpte,prev_spfn, prev_spl1e);
 
     // to get here, we know the l1 page *must* be shadowed
 
     gpfn = pa >> PAGE_SHIFT;
-    spfn = __shadow_status(current, gpfn) & PSH_pfn_mask;
+    spfn = __shadow_status(&current->mm, gpfn) & PSH_pfn_mask;
 
     if ( spfn == prev_spfn )
     {
@@ -581,13 +576,13 @@ void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte )
     // to get here, we know the l2 page has a shadow
 
     gpfn = pa >> PAGE_SHIFT;
-    spfn = __shadow_status(current, gpfn) & PSH_pfn_mask;
+    spfn = __shadow_status(&current->mm, gpfn) & PSH_pfn_mask;
 
 
     spte = 0;
 
        if( gpte & _PAGE_PRESENT )
-               s_sh = __shadow_status(current, gpte >> PAGE_SHIFT);
+               s_sh = __shadow_status(&current->mm, gpte >> PAGE_SHIFT);
 
     sp2le = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
     // no real need for a cache here
@@ -622,7 +617,7 @@ char * sh_check_name;
 #define FAIL(_f, _a...)                             \
 {printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n",  sh_check_name, level, i, ## _a , gpte, spte ); BUG();}
 
-static int check_pte( struct task_struct *p
+static int check_pte( struct mm_struct *m
                           unsigned long gpte, unsigned long spte, int level, int i )
 {
        unsigned long mask, gpfn, spfn;
@@ -680,7 +675,7 @@ static int check_pte( struct task_struct *p,
 }
 
 
-static int check_l1_table( struct task_struct *p, unsigned long va, 
+static int check_l1_table( struct mm_struct *m, unsigned long va, 
                                        unsigned long g2, unsigned long s2 )
 {
        int j;
@@ -709,7 +704,7 @@ static int check_l1_table( struct task_struct *p, unsigned long va,
 #define FAILPT(_f, _a...)                             \
 {printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); BUG();}
 
-int check_pagetable( struct task_struct *p, pagetable_t pt, char *s )
+int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s )
 {
        unsigned long gptbase = pagetable_val(pt);
        unsigned long gpfn, spfn;
index 08685b65a0b09238d3c08d482aeede60c1e7f9a7..60c70d90373631bfe566c75dea8ab5892e863bb6 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/types.h>
 #include <asm/cpufeature.h>
 #include <asm/desc.h>
+#include <asm/flushtlb.h>
 #include <xen/config.h>
 #include <xen/spinlock.h>
 #include <hypervisor-ifs/hypervisor-if.h>
@@ -233,6 +234,8 @@ static inline void clear_in_cr4 (unsigned long mask)
             :"ax");
 }
 
+
+
 /*
  *      Cyrix CPU configuration register indexes
  */
@@ -432,6 +435,22 @@ struct mm_struct {
     char gdt[6];
 };
 
+static inline void write_ptbase( struct mm_struct *m )
+{
+/*    printk("write_ptbase mode=%08x pt=%08lx st=%08lx\n",
+          m->shadow_mode, pagetable_val(m->pagetable),
+          pagetable_val(m->shadow_table) );
+ */
+    if( m->shadow_mode )
+      {
+       //check_pagetable( m, m->pagetable, "write_ptbase" );
+       write_cr3_counted(pagetable_val(m->shadow_table));
+      }
+    else
+      write_cr3_counted(pagetable_val(m->pagetable));
+}
+
+
 #define IDLE0_MM                                                    \
 {                                                                   \
     perdomain_pt: 0,                                                \
index bc5a6362ea4fc1667530199b93c1d58c062d5115..a4d91c0e1184951f5990c5bc2b80757165ce5819 100644 (file)
@@ -8,6 +8,7 @@
 #include <xen/perfc.h>
 #include <xen/sched.h>
 
+#include <asm/processor.h>
 #include <asm/pgalloc.h>
 #include <asm/atomic.h>
 #include <asm/desc.h>
index 2389fcae18c9a4d9056f465952ae6f1bb914513c..a9d03b4198e6a3ec7e74299ee2293ae58a5f87c3 100644 (file)
@@ -1,12 +1,13 @@
 /* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*- */
 
-#ifndef _XENO_SHADOW_H
-#define _XENO_SHADOW_H
+#ifndef _XEN_SHADOW_H
+#define _XEN_SHADOW_H
 
 #include <xen/config.h>
 #include <xen/types.h>
-#include <xen/mm.h>
 #include <xen/perfc.h>
+#include <asm/processor.h>
+
 
 /* Shadow PT flag bits in pfn_info */
 #define PSH_shadowed   (1<<31) /* page has a shadow. PFN points to shadow */
 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START+(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
 
 extern int shadow_mode_control( struct task_struct *p, unsigned int op );
-extern pagetable_t shadow_mk_pagetable( struct task_struct *p, 
-                                                                               unsigned long gptbase);
 extern int shadow_fault( unsigned long va, long error_code );
 extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte, 
                                                                                unsigned long *prev_spfn_ptr,
                                                                                l1_pgentry_t **prev_spl1e_ptr  );
 extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte );
 extern void unshadow_table( unsigned long gpfn, unsigned int type );
-extern int shadow_mode_enable( struct task_struct *p, unsigned int mode );
+extern int shadow_mode_enable( struct mm_struct *m, unsigned int mode );
+extern unsigned long shadow_l2_table( 
+                     struct mm_struct *m, unsigned long gpfn );
 
 #define SHADOW_DEBUG 0
 #define SHADOW_HASH_DEBUG 0
@@ -73,7 +74,7 @@ struct shadow_status {
 
 
 #if SHADOW_HASH_DEBUG
-static void shadow_audit(struct task_struct *p, int print)
+static void shadow_audit(struct mm_struct *m, int print)
 {
        int live=0, free=0, j=0, abs;
        struct shadow_status *a;
@@ -115,23 +116,25 @@ static void shadow_audit(struct task_struct *p, int print)
 #define shadow_audit(p, print)
 #endif
 
-static inline struct shadow_status* hash_bucket( struct task_struct *p,
+
+
+static inline struct shadow_status* hash_bucket( struct mm_struct *m,
                                                                                                 unsigned int gpfn )
 {
-    return &(p->mm.shadow_ht[gpfn % shadow_ht_buckets]);
+    return &(m->shadow_ht[gpfn % shadow_ht_buckets]);
 }
 
 
-static inline unsigned long __shadow_status( struct task_struct *p,
+static inline unsigned long __shadow_status( struct mm_struct *m,
                                                                                   unsigned int gpfn )
 {
-       struct shadow_status **ob, *b, *B = hash_bucket( p, gpfn );
+       struct shadow_status **ob, *b, *B = hash_bucket( m, gpfn );
 
     b = B;
     ob = NULL;
 
-       SH_VVLOG("lookup gpfn=%08lx bucket=%08lx", gpfn, b );
-       shadow_audit(p,0);  // if in debug mode
+       SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, b );
+       shadow_audit(m,0);  // if in debug mode
 
        do
        {
@@ -172,33 +175,33 @@ static inline unsigned long __shadow_status( struct task_struct *p,
 ever becomes a problem, but since we need a spin lock on the hash table 
 anyway its probably not worth being too clever. */
 
-static inline unsigned long get_shadow_status( struct task_struct *p,
+static inline unsigned long get_shadow_status( struct mm_struct *m,
                                                                                   unsigned int gpfn )
 {
        unsigned long res;
 
-       spin_lock(&p->mm.shadow_lock);
-       res = __shadow_status( p, gpfn );
-       if (!res) spin_unlock(&p->mm.shadow_lock);
+       spin_lock(&m->shadow_lock);
+       res = __shadow_status( m, gpfn );
+       if (!res) spin_unlock(&m->shadow_lock);
        return res;
 }
 
 
-static inline void put_shadow_status( struct task_struct *p )
+static inline void put_shadow_status( struct mm_struct *m )
 {
-       spin_unlock(&p->mm.shadow_lock);
+       spin_unlock(&m->shadow_lock);
 }
 
 
-static inline void delete_shadow_status( struct task_struct *p,
+static inline void delete_shadow_status( struct mm_struct *m,
                                                                          unsigned int gpfn )
 {
        struct shadow_status *b, *B, **ob;
 
-       B = b = hash_bucket( p, gpfn );
+       B = b = hash_bucket( m, gpfn );
 
        SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, b );
-       shadow_audit(p,0);
+       shadow_audit(m,0);
        ASSERT(gpfn);
 
        if( b->pfn == gpfn )
@@ -210,8 +213,8 @@ static inline void delete_shadow_status( struct task_struct *p,
                        b->pfn = b->next->pfn;
 
                        b->next = b->next->next;
-                       D->next = p->mm.shadow_ht_free;
-                       p->mm.shadow_ht_free = D;
+                       D->next = m->shadow_ht_free;
+                       m->shadow_ht_free = D;
                }
                else
                {
@@ -220,7 +223,7 @@ static inline void delete_shadow_status( struct task_struct *p,
                }
 
 #if SHADOW_HASH_DEBUG
-               if( __shadow_status(p,gpfn) ) BUG();  
+               if( __shadow_status(m,gpfn) ) BUG();  
 #endif
                return;
     }
@@ -237,11 +240,11 @@ static inline void delete_shadow_status( struct task_struct *p,
 
                        // b is in the list
             *ob=b->next;
-                       b->next = p->mm.shadow_ht_free;
-                       p->mm.shadow_ht_free = b;
+                       b->next = m->shadow_ht_free;
+                       m->shadow_ht_free = b;
 
 #if SHADOW_HASH_DEBUG
-                       if( __shadow_status(p,gpfn) ) BUG();
+                       if( __shadow_status(m,gpfn) ) BUG();
 #endif
                        return;
                }
@@ -256,18 +259,18 @@ static inline void delete_shadow_status( struct task_struct *p,
 }
 
 
-static inline void set_shadow_status( struct task_struct *p,
+static inline void set_shadow_status( struct mm_struct *m,
                                                                          unsigned int gpfn, unsigned long s )
 {
        struct shadow_status *b, *B, *extra, **fptr;
     int i;
 
-       B = b = hash_bucket( p, gpfn );
+       B = b = hash_bucket( m, gpfn );
    
     ASSERT(gpfn);
     ASSERT(s);
     SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, b, b->next );
-    shadow_audit(p,0);
+    shadow_audit(m,0);
 
        do
        {
@@ -294,7 +297,7 @@ static inline void set_shadow_status( struct task_struct *p,
                return;
        }
 
-    if( unlikely(p->mm.shadow_ht_free == NULL) )
+    if( unlikely(m->shadow_ht_free == NULL) )
     {
         SH_LOG("allocate more shadow hashtable blocks");
 
@@ -308,7 +311,7 @@ static inline void set_shadow_status( struct task_struct *p,
                                                           sizeof(struct shadow_status)) );
        
         // add extras to free list
-           fptr = &p->mm.shadow_ht_free;
+           fptr = &m->shadow_ht_free;
            for ( i=0; i<shadow_ht_extra_size; i++ )
            {
                    *fptr = &extra[i];
@@ -316,15 +319,15 @@ static inline void set_shadow_status( struct task_struct *p,
            }
            *fptr = NULL;
 
-           *((struct shadow_status ** ) &p->mm.shadow_ht[shadow_ht_extra_size]) = 
-                                            p->mm.shadow_ht_extras;
-        p->mm.shadow_ht_extras = extra;
+           *((struct shadow_status ** ) &m->shadow_ht[shadow_ht_extra_size]) = 
+                                            m->shadow_ht_extras;
+        m->shadow_ht_extras = extra;
 
     }
 
        // should really put this in B to go right to front
-       b = p->mm.shadow_ht_free;
-    p->mm.shadow_ht_free = b->next;
+       b = m->shadow_ht_free;
+    m->shadow_ht_free = b->next;
     b->spfn_and_flags = s;
        b->pfn = gpfn;
        b->next = B->next;
@@ -333,13 +336,39 @@ static inline void set_shadow_status( struct task_struct *p,
        return;
 }
 
+static inline void shadow_mk_pagetable( struct mm_struct *mm )
+{
+       unsigned long gpfn, spfn=0;
+
+       SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
+                        pagetable_val(mm->pagetable), mm->shadow_mode );
+
+       if ( unlikely(mm->shadow_mode) )
+       {
+               gpfn =  pagetable_val(mm->pagetable) >> PAGE_SHIFT;
+               
+        spin_lock(&mm->shadow_lock);
+               if ( unlikely((spfn=__shadow_status(mm, gpfn)) == 0 ) )
+               {
+                       spfn = shadow_l2_table(mm, gpfn );
+               }      
+               mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
+        spin_unlock(&mm->shadow_lock);         
+       }
+
+       SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx",
+                        pagetable_val(mm->pagetable), mm->shadow_mode, 
+                        pagetable_val(mm->shadow_table) );
+
+}
+
 
 
 #if SHADOW_DEBUG
-extern int check_pagetable( struct task_struct *p, pagetable_t pt, char *s );
+extern int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s );
 #else
-#define check_pagetable( p, pt, s )
+#define check_pagetable( m, pt, s )
 #endif
 
 
-#endif
+#endif /* XEN_SHADOW_H */
index e7e277f5e5dd0e49c7a468b6738c9faa42996ffd..31d80e2a1476ad15f695a0e8f353fa3fe0a811a3 100644 (file)
@@ -548,7 +548,7 @@ void deliver_packet(struct sk_buff *skb, net_vif_t *vif)
     }
 
     if ( p->mm.shadow_mode && 
-        (spte_pfn=get_shadow_status(p, pte_page-frame_table)) )
+        (spte_pfn=get_shadow_status(&p->mm, pte_page-frame_table)) )
     {
        unsigned long *sptr = map_domain_mem( (spte_pfn<<PAGE_SHIFT) |
                        (((unsigned long)ptep)&~PAGE_MASK) );
@@ -557,7 +557,7 @@ void deliver_packet(struct sk_buff *skb, net_vif_t *vif)
        *sptr = new_pte;
 
        unmap_domain_mem(sptr);
-       put_shadow_status(p);
+       put_shadow_status(&p->mm);
     }
 
     machine_to_phys_mapping[new_page - frame_table] 
@@ -2113,14 +2113,14 @@ static void get_rx_bufs(net_vif_t *vif)
         }
 
        if ( p->mm.shadow_mode && 
-            (spfn=get_shadow_status(p, rx.addr>>PAGE_SHIFT)) )
+            (spfn=get_shadow_status(&p->mm, rx.addr>>PAGE_SHIFT)) )
          {
            unsigned long * sptr = 
              map_domain_mem( (spfn<<PAGE_SHIFT) | (rx.addr&~PAGE_MASK) );
 
            *sptr = 0;
            unmap_domain_mem( sptr );
-           put_shadow_status(p);
+           put_shadow_status(&p->mm);
          }
         
         buf_pfn  = pte >> PAGE_SHIFT;